if (ovl) {
// generate MCA.
panic("Tlb conflict!!");
- return;
+ return -1;
}
thash_purge_and_insert(hcb, &entry);
}else if(cmd == MMU_MACHPHYS_UPDATE){
#include <public/hvm/ioreq.h>
#include <asm/mm.h>
#include <asm/vmx.h>
+#include <public/event_channel.h>
/*
struct mmio_list *lookup_mmio(u64 gpa, struct mmio_list *mio_base)
struct vcpu *v = current;
vcpu_iodata_t *vio;
ioreq_t *p;
- unsigned long addr;
vio = get_vio(v->domain, v->vcpu_id);
if (vio == 0) {
struct vcpu *v = current;
vcpu_iodata_t *vio;
ioreq_t *p;
- unsigned long addr;
vio = get_vio(v->domain, v->vcpu_id);
if (vio == 0) {
{
REGS *regs;
IA64_BUNDLE bundle;
- int slot, dir, inst_type;
+ int slot, dir=0, inst_type;
size_t size;
u64 data, value,post_update, slot1a, slot1b, temp;
INST64 inst;
static struct ia64_pal_retval
pal_vm_page_size(VCPU *vcpu){
}
-
void
pal_emul( VCPU *vcpu) {
UINT64 gr28;
/*
* Update the checked last_itc.
*/
+
+extern void vmx_reflect_interruption(UINT64 ifa,UINT64 isr,UINT64 iim,
+ UINT64 vector,REGS *regs);
static void update_last_itc(vtime_t *vtm, uint64_t cur_itc)
{
vtm->last_itc = cur_itc;
if (vector & ~0xff) {
DPRINTK("vmx_vcpu_pend_interrupt: bad vector\n");
- return;
+ return -1;
}
local_irq_save(spsr);
ret = test_and_set_bit(vector, &VCPU(vcpu, irr[0]));
uint64_t guest_read_vivr(VCPU *vcpu)
{
- int vec, next, h_inservice;
+ int vec, h_inservice;
uint64_t spsr;
local_irq_save(spsr);
vmx_reflect_interruption(0,isr,0, 12, regs); // EXT IRQ
}
-vhpi_detection(VCPU *vcpu)
+void vhpi_detection(VCPU *vcpu)
{
uint64_t threshold,vhpi;
tpr_t vtpr;
}
}
-vmx_vexirq(VCPU *vcpu)
+void vmx_vexirq(VCPU *vcpu)
{
static uint64_t vexirq_count=0;
#include <asm/hw_irq.h>
#include <asm/vmx_pal_vsa.h>
#include <asm/kregs.h>
+#include <xen/irq.h>
/*
* Architecture ppn is in 4KB unit while XEN
u64 get_mfn(domid_t domid, u64 gpfn, u64 pages)
{
struct domain *d;
- u64 i, xen_gppn, xen_mppn, mpfn;
+ u64 xen_gppn, xen_mppn, mpfn;
if ( domid == DOMID_SELF ) {
d = current->domain;
vhpt->vs->tag_func = machine_ttag;
vhpt->hash = vbase;
vhpt->hash_sz = VCPU_TLB_SIZE/2;
- vhpt->cch_buf = (u64)vbase + vhpt->hash_sz;
+ vhpt->cch_buf = (void *)(vbase + vhpt->hash_sz);
vhpt->cch_sz = (u64)vcur - (u64)vhpt->cch_buf;
vhpt->recycle_notifier = recycle_message;
thash_init(vhpt,VCPU_TLB_SHIFT-1);
tlb->hash_func = machine_thash;
tlb->hash = vbase;
tlb->hash_sz = VCPU_TLB_SIZE/2;
- tlb->cch_buf = (u64)vbase + tlb->hash_sz;
+ tlb->cch_buf = (void *)((u64)vbase + tlb->hash_sz);
tlb->cch_sz = (u64)vcur - (u64)tlb->cch_buf;
tlb->recycle_notifier = recycle_message;
thash_init(tlb,VCPU_TLB_SHIFT-1);
u64 psr;
thash_data_t mtlb;
unsigned int cl = tlb->cl;
-
+ unsigned long mtlb_ppn;
mtlb.ifa = tlb->vadr;
mtlb.itir = tlb->itir & ~ITIR_RV_MASK;
//vmx_vcpu_get_rr(d, mtlb.ifa, &vrr.value);
mtlb.page_flags = tlb->page_flags & ~PAGE_FLAGS_RV_MASK;
- mtlb.ppn = get_mfn(DOMID_SELF,tlb->ppn, 1);
- if (mtlb.ppn == INVALID_MFN)
+ mtlb.ppn = (unsigned long)get_mfn(DOMID_SELF,tlb->ppn, 1);
+ mtlb_ppn=mtlb.ppn;
+ if (mtlb_ppn == INVALID_MFN)
panic("Machine tlb insert with invalid mfn number.\n");
psr = ia64_clear_ic();
u64 machine_thash(PTA pta, u64 va)
{
u64 saved_pta;
- u64 hash_addr, tag;
+ u64 hash_addr;
unsigned long psr;
- struct vcpu *v = current;
- ia64_rr vrr;
saved_pta = ia64_getreg(_IA64_REG_CR_PTA);
psr = ia64_clear_ic();
data.vadr=PAGEALIGN(ifa,data.ps);
data.tc = 1;
data.cl=ISIDE_TLB;
- vmx_vcpu_get_rr(vcpu, ifa, &vrr);
+ vmx_vcpu_get_rr(vcpu, ifa, (UINT64 *)&vrr);
data.rid = vrr.rid;
sections.tr = 1;
while (ovl) {
// generate MCA.
panic("Tlb conflict!!");
- return;
+ return IA64_FAULT;
}
thash_purge_and_insert(hcb, &data);
return IA64_NO_FAULT;
data.vadr=PAGEALIGN(ifa,data.ps);
data.tc = 1;
data.cl=DSIDE_TLB;
- vmx_vcpu_get_rr(vcpu, ifa, &vrr);
+ vmx_vcpu_get_rr(vcpu, ifa, (UINT64 *)&vrr);
data.rid = vrr.rid;
sections.tr = 1;
sections.tc = 0;
if (ovl) {
// generate MCA.
panic("Tlb conflict!!");
- return;
+ return IA64_FAULT;
}
thash_purge_and_insert(hcb, &data);
return IA64_NO_FAULT;
ia64_rr vrr;
u64 preferred_size;
- vmx_vcpu_get_rr(vcpu, va, &vrr);
+ vmx_vcpu_get_rr(vcpu, va, (UINT64 *)&vrr);
hcb = vmx_vcpu_get_vtlb(vcpu);
va = PAGEALIGN(va,vrr.ps);
preferred_size = PSIZE(vrr.ps);
data.vadr=PAGEALIGN(ifa,data.ps);
data.tc = 0;
data.cl=ISIDE_TLB;
- vmx_vcpu_get_rr(vcpu, ifa, &vrr);
+ vmx_vcpu_get_rr(vcpu, ifa, (UINT64 *)&vrr);
data.rid = vrr.rid;
sections.tr = 1;
sections.tc = 0;
if (ovl) {
// generate MCA.
panic("Tlb conflict!!");
- return;
+ return IA64_FAULT;
}
sections.tr = 0;
sections.tc = 1;
data.vadr=PAGEALIGN(ifa,data.ps);
data.tc = 0;
data.cl=DSIDE_TLB;
- vmx_vcpu_get_rr(vcpu, ifa, &vrr);
+ vmx_vcpu_get_rr(vcpu, ifa, (UINT64 *)&vrr);
data.rid = vrr.rid;
sections.tr = 1;
sections.tc = 0;
while (ovl) {
// generate MCA.
panic("Tlb conflict!!");
- return;
+ return IA64_FAULT;
}
sections.tr = 0;
sections.tc = 1;
thash_cb_t *hcb;
ia64_rr vrr;
search_section_t sections;
- thash_data_t data, *ovl;
hcb = vmx_vcpu_get_vtlb(vcpu);
vrr=vmx_vcpu_rr(vcpu,vadr);
sections.tr = 0;
{
PTA vpta;
ia64_rr vrr;
- u64 vhpt_offset,tmp;
+ u64 vhpt_offset;
vmx_vcpu_get_pta(vcpu, &vpta.val);
vrr=vmx_vcpu_rr(vcpu, vadr);
if(vpta.vf){
#include <xen/mm.h>
#include <xen/multicall.h>
#include <xen/hypercall.h>
+#include <public/version.h>
+#include <asm/dom_fw.h>
+#include <xen/domain.h>
+
+extern long do_sched_op(int cmd, unsigned long arg);
void hyper_not_support(void)
vcpu_get_gr_nat(vcpu,17,&r33);
vcpu_get_gr_nat(vcpu,18,&r34);
vcpu_get_gr_nat(vcpu,19,&r35);
- ret=vmx_do_mmu_update((mmu_update_t*)r32,r33,r34,r35);
+ ret=vmx_do_mmu_update((mmu_update_t*)r32,r33,(u64 *)r34,r35);
vcpu_set_gr(vcpu, 8, ret, 0);
vmx_vcpu_increment_iip(vcpu);
}
static int do_lock_page(VCPU *vcpu, u64 va, u64 lock)
{
- int i;
ia64_rr rr;
thash_cb_t *hcb;
hcb = vmx_vcpu_get_vtlb(vcpu);
* to xen heap. Or else, leave to domain itself to decide.
*/
if (likely(IS_XEN_HEAP_FRAME(virt_to_page(o_info))))
- free_xenheap_page(o_info);
+ free_xenheap_page((void *)o_info);
} else
memset(d->shared_info, 0, PAGE_SIZE);
return 0;
if (!(vp_env_info & VP_OPCODE))
printk("WARNING: no opcode provided from hardware(%lx)!!!\n", vp_env_info);
vm_order = get_order(buffer_size);
- printk("vm buffer size: %d, order: %d\n", buffer_size, vm_order);
+ printk("vm buffer size: %ld, order: %ld\n", buffer_size, vm_order);
vmx_enabled = 1;
no_vti:
u64 status, tmp_base;
if (!vm_buffer) {
- vm_buffer = alloc_xenheap_pages(vm_order);
+ vm_buffer = (unsigned long)alloc_xenheap_pages(vm_order);
ASSERT(vm_buffer);
printk("vm_buffer: 0x%lx\n", vm_buffer);
}
if (status != PAL_STATUS_SUCCESS) {
printk("ia64_pal_vp_init_env failed.\n");
- return -1;
+ return ;
}
if (!__vsa_base)
/* ia64_ivt is function pointer, so need this tranlation */
ivt_base = (u64) &vmx_ia64_ivt;
printk("ivt_base: 0x%lx\n", ivt_base);
- ret = ia64_pal_vp_create(vpd, ivt_base, 0);
+ ret = ia64_pal_vp_create((u64 *)vpd, (u64 *)ivt_base, 0);
if (ret != PAL_STATUS_SUCCESS)
panic("ia64_pal_vp_create failed. \n");
}
void
vmx_save_state(struct vcpu *v)
{
- u64 status, psr;
- u64 old_rr0, dom_rr7, rr0_xen_start, rr0_vhpt;
+ u64 status;
/* FIXME: about setting of pal_proc_vector... time consuming */
- status = ia64_pal_vp_save(v->arch.privregs, 0);
+ status = ia64_pal_vp_save((u64 *)v->arch.privregs, 0);
if (status != PAL_STATUS_SUCCESS)
panic("Save vp status failed\n");
void
vmx_load_state(struct vcpu *v)
{
- u64 status, psr;
- u64 old_rr0, dom_rr7, rr0_xen_start, rr0_vhpt;
- u64 pte_xen, pte_vhpt;
- int i;
+ u64 status;
status = ia64_pal_vp_restore(v->arch.privregs, 0);
if (status != PAL_STATUS_SUCCESS)
ASSERT(d != dom0); /* only for non-privileged vti domain */
d->arch.vmx_platform.shared_page_va =
- __va(__gpa_to_mpa(d, IO_PAGE_START));
+ (unsigned long)__va(__gpa_to_mpa(d, IO_PAGE_START));
sp = get_sp(d);
//memset((char *)sp,0,PAGE_SIZE);
/* TEMP */
}
-int
+void
inject_guest_interruption(VCPU *vcpu, u64 vec)
{
u64 viva;
* @ Nat Consumption Vector
* Refer to SDM Vol2 Table 5-6 & 8-1
*/
+
static void
ir_nat_page_consumption (VCPU *vcpu, u64 vadr)
{
#include <asm/pgtable.h>
#include <asm/system.h>
+#include <asm/vcpu.h>
+#include <xen/irq.h>
#ifdef CONFIG_SMP
# define IS_RESCHEDULE(vec) (vec == IA64_IPI_RESCHEDULE)
#else
* come through until ia64_eoi() has been done.
*/
vmx_irq_exit();
- if ( wake_dom0 && current != dom0 )
+ if (current && wake_dom0 != dom0 )
vcpu_wake(dom0->vcpu[0]);
}
* data access can be satisfied though itlb entry for physical
* emulation is hit.
*/
- SW_SELF,0, 0, SW_NOP, 0, 0, 0, SW_P2V,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
+ {SW_SELF,0, 0, SW_NOP, 0, 0, 0, SW_P2V},
+ {0, 0, 0, 0, 0, 0, 0, 0},
+ {0, 0, 0, 0, 0, 0, 0, 0},
/*
* (it,dt,rt): (0,1,1) -> (1,1,1)
* This kind of transition is found in OSYa.
* (it,dt,rt): (0,1,1) -> (0,0,0)
* This kind of transition is found in OSYa
*/
- SW_NOP, 0, 0, SW_SELF,0, 0, 0, SW_P2V,
+ {SW_NOP, 0, 0, SW_SELF,0, 0, 0, SW_P2V},
/* (1,0,0)->(1,1,1) */
- 0, 0, 0, 0, 0, 0, 0, SW_P2V,
+ {0, 0, 0, 0, 0, 0, 0, SW_P2V},
/*
* (it,dt,rt): (1,0,1) -> (1,1,1)
* This kind of transition usually occurs when Linux returns
* from the low level TLB miss handlers.
* (see "arch/ia64/kernel/ivt.S")
*/
- 0, 0, 0, 0, 0, SW_SELF,0, SW_P2V,
- 0, 0, 0, 0, 0, 0, 0, 0,
+ {0, 0, 0, 0, 0, SW_SELF,0, SW_P2V},
+ {0, 0, 0, 0, 0, 0, 0, 0},
/*
* (it,dt,rt): (1,1,1) -> (1,0,1)
* This kind of transition usually occurs in Linux low level
* (1,1,1)->(1,0,0)
*/
- SW_V2P, 0, 0, 0, SW_V2P, SW_V2P, 0, SW_SELF,
+ {SW_V2P, 0, 0, 0, SW_V2P, SW_V2P, 0, SW_SELF}
};
void
physical_mode_init(VCPU *vcpu)
{
- UINT64 psr;
- struct domain * d = vcpu->domain;
-
vcpu->arch.old_rsc = 0;
vcpu->arch.mode_flags = GUEST_IN_PHY;
}
extern u64 get_mfn(domid_t domid, u64 gpfn, u64 pages);
-#if 0
-void
-physical_itlb_miss_domn(VCPU *vcpu, u64 vadr)
-{
- u64 psr;
- IA64_PSR vpsr;
- u64 mppn,gppn,mpp1,gpp1;
- struct domain *d;
- static u64 test=0;
- d=vcpu->domain;
- if(test)
- panic("domn physical itlb miss happen\n");
- else
- test=1;
- vpsr.val=vmx_vcpu_get_psr(vcpu);
- gppn=(vadr<<1)>>13;
- mppn = get_mfn(DOMID_SELF,gppn,1);
- mppn=(mppn<<12)|(vpsr.cpl<<7);
- gpp1=0;
- mpp1 = get_mfn(DOMID_SELF,gpp1,1);
- mpp1=(mpp1<<12)|(vpsr.cpl<<7);
-// if(vadr>>63)
-// mppn |= PHY_PAGE_UC;
-// else
-// mppn |= PHY_PAGE_WB;
- mpp1 |= PHY_PAGE_WB;
- psr=ia64_clear_ic();
- ia64_itr(0x1, IA64_TEMP_PHYSICAL, vadr&(~0xfff), (mppn|PHY_PAGE_WB), 24);
- ia64_srlz_i();
- ia64_itr(0x2, IA64_TEMP_PHYSICAL, vadr&(~0xfff), (mppn|PHY_PAGE_WB), 24);
- ia64_stop();
- ia64_srlz_i();
- ia64_itr(0x1, IA64_TEMP_PHYSICAL+1, vadr&(~0x8000000000000fffUL), (mppn|PHY_PAGE_WB), 24);
- ia64_srlz_i();
- ia64_itr(0x2, IA64_TEMP_PHYSICAL+1, vadr&(~0x8000000000000fffUL), (mppn|PHY_PAGE_WB), 24);
- ia64_stop();
- ia64_srlz_i();
- ia64_itr(0x1, IA64_TEMP_PHYSICAL+2, gpp1&(~0xfff), mpp1, 28);
- ia64_srlz_i();
- ia64_itr(0x2, IA64_TEMP_PHYSICAL+2, gpp1&(~0xfff), mpp1, 28);
- ia64_stop();
- ia64_srlz_i();
- ia64_set_psr(psr);
- ia64_srlz_i();
- return;
-}
-#endif
+extern void vmx_switch_rr7(unsigned long ,shared_info_t*,void *,void *,void *);
void
physical_itlb_miss_dom0(VCPU *vcpu, u64 vadr)
switch_mm_mode (vcpu, old_psr, new_psr);
}
- return 0;
+ return;
}
#include <asm/vmx_mm_def.h>
#include <asm/vmx_phy_mode.h>
#include <xen/mm.h>
+#include <asm/vmx_pal.h>
/* reset all PSR field to 0, except up,mfl,mfh,pk,dt,rt,mc,it */
#define INITIAL_PSR_VALUE_AT_INTERRUPTION 0x0000001808028034
0x6100,0x6200,0x6300,0x6400,0x6500,0x6600,0x6700,0x6800,0x6900,0x6a00,
0x6b00,0x6c00,0x6d00,0x6e00,0x6f00,0x7000,0x7100,0x7200,0x7300,0x7400,
0x7500,0x7600,0x7700,0x7800,0x7900,0x7a00,0x7b00,0x7c00,0x7d00,0x7e00,
- 0x7f00,
+ 0x7f00
};
UINT64 vector,REGS *regs)
{
VCPU *vcpu = current;
- UINT64 viha,vpsr = vmx_vcpu_get_psr(vcpu);
+ UINT64 vpsr = vmx_vcpu_get_psr(vcpu);
if(!(vpsr&IA64_PSR_IC)&&(vector!=5)){
panic("Guest nested fault!");
}
IA64FAULT
vmx_ia64_handle_break (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long iim)
{
- static int first_time = 1;
struct domain *d = (struct domain *) current->domain;
- struct vcpu *v = (struct domain *) current;
- extern unsigned long running_on_sim;
+ struct vcpu *v = (struct vcpu *) current;
unsigned long i, sal_param[8];
#if 0
case FW_HYPERCALL_EFI_GET_TIME:
{
unsigned long *tv, *tc;
- vcpu_get_gr_nat(v, 32, &tv);
- vcpu_get_gr_nat(v, 33, &tc);
+ vcpu_get_gr_nat(v, 32, (u64 *)&tv);
+ vcpu_get_gr_nat(v, 33, (u64 *)&tc);
printf("efi_get_time(%p,%p) called...",tv,tc);
- tv = __va(translate_domain_mpaddr(tv));
- if (tc) tc = __va(translate_domain_mpaddr(tc));
- regs->r8 = (*efi.get_time)(tv,tc);
+ tv = __va(translate_domain_mpaddr((unsigned long)tv));
+ if (tc) tc = __va(translate_domain_mpaddr((unsigned long)tc));
+ regs->r8 = (*efi.get_time)((efi_time_t *)tv,(efi_time_cap_t *)tc);
printf("and returns %lx\n",regs->r8);
}
break;
die_if_kernel("bug check", regs, iim);
vmx_reflect_interruption(ifa,isr,iim,11,regs);
}
+ return IA64_NO_FAULT;
}
void save_banked_regs_to_vpd(VCPU *v, REGS *regs)
{
- unsigned long i, * src,* dst, *sunat, *dunat;
+ unsigned long i=0UL, * src,* dst, *sunat, *dunat;
IA64_PSR vpsr;
src=®s->r16;
sunat=®s->eml_unat;
}
/* We came here because the H/W VHPT walker failed to find an entry */
-void vmx_hpw_miss(u64 vadr , u64 vec, REGS* regs)
+IA64FAULT
+vmx_hpw_miss(u64 vadr , u64 vec, REGS* regs)
{
IA64_PSR vpsr;
- CACHE_LINE_TYPE type;
+ CACHE_LINE_TYPE type=ISIDE_TLB;
u64 vhpt_adr, gppa;
ISR misr;
ia64_rr vrr;
// REGS *regs;
- thash_cb_t *vtlb, *vhpt;
- thash_data_t *data, me;
+ thash_cb_t *vtlb;
+ thash_data_t *data;
VCPU *v = current;
vtlb=vmx_vcpu_get_vtlb(v);
#ifdef VTLB_DEBUG
if(is_physical_mode(v)&&(!(vadr<<1>>62))){
if(vec==1){
physical_itlb_miss(v, vadr);
- return;
+ return IA64_FAULT;
}
if(vec==2){
if(v->domain!=dom0&&__gpfn_is_io(v->domain,(vadr<<1)>>(PAGE_SHIFT+1))){
}else{
physical_dtlb_miss(v, vadr);
}
- return;
+ return IA64_FAULT;
}
}
vrr = vmx_vcpu_rr(v, vadr);
// prepare_if_physical_mode(v);
- if(data=vtlb_lookup_ex(vtlb, vrr.rid, vadr,type)){
+ if((data=vtlb_lookup_ex(vtlb, vrr.rid, vadr,type))!=0){
gppa = (vadr&((1UL<<data->ps)-1))+(data->ppn>>(data->ps-12)<<data->ps);
if(v->domain!=dom0&&type==DSIDE_TLB && __gpfn_is_io(v->domain,gppa>>PAGE_SHIFT)){
emulate_io_inst(v, gppa, data->ma);
}
}
}
+ return IA64_NO_FAULT;
}
-
-
}
return 0;
}
-
-
panic ("Unsupported CR");
+ return 0;
}
void set_isr_for_priv_fault(VCPU *vcpu, u64 non_access)
{
- u64 value;
ISR isr;
isr.val = set_isr_ei_ni(vcpu);
#include <asm/gcc_intrin.h>
#include <asm/vmx_mm_def.h>
#include <asm/vmx.h>
-
+#include <asm/vmx_phy_mode.h>
//u64 fire_itc;
//u64 fire_itc2;
//u64 fire_itm;
#include <asm/hw_irq.h>
#include <asm/vmx_pal_vsa.h>
#include <asm/kregs.h>
-
//unsigned long last_guest_rsm = 0x0;
struct guest_psr_bundle{
unsigned long ip;
regs->cr_ipsr = (regs->cr_ipsr & mask ) | ( value & (~mask) );
check_mm_mode_switch(vcpu, old_psr, new_psr);
- return IA64_NO_FAULT;
+ return ;
}
/* Adjust slot both in pt_regs and vpd, upon vpsr.ri which
#include <asm/vmmu.h>
#include <asm/vmx_mm_def.h>
#include <asm/smp.h>
-
+#include <asm/vmx.h>
#include <asm/virt_event.h>
+#include <asm/vmx_phy_mode.h>
extern UINT64 privop_trace;
void
*cause=EVENT_BSW_1;
}
}
+ case I:
+ case F:
+ case L:
+ case ILLEGAL:
+ break;
}
}
{
UINT64 tgt = inst.M33.r1;
UINT64 val;
- IA64FAULT fault;
/*
if ((fault = vmx_vcpu_get_psr(vcpu,&val)) == IA64_NO_FAULT)
IA64FAULT vmx_emul_mov_to_psr(VCPU *vcpu, INST64 inst)
{
UINT64 val;
- IA64FAULT fault;
if(vcpu_get_gr_nat(vcpu, inst.M35.r2, &val) != IA64_NO_FAULT)
panic(" get_psr nat bit fault\n");
IA64FAULT vmx_emul_ptc_l(VCPU *vcpu, INST64 inst)
{
u64 r2,r3;
- ISR isr;
IA64_PSR vpsr;
vpsr.val=vmx_vcpu_get_psr(vcpu);
}
if(vcpu_get_gr_nat(vcpu,inst.M45.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M45.r2,&r2)){
#ifdef VMAL_NO_FAULT_CHECK
+ ISR isr;
set_isr_reg_nat_consumption(vcpu,0,0);
rnat_comsumption(vcpu);
return IA64_FAULT;
IA64FAULT vmx_emul_ptc_e(VCPU *vcpu, INST64 inst)
{
u64 r3;
- ISR isr;
IA64_PSR vpsr;
vpsr.val=vmx_vcpu_get_psr(vcpu);
#ifdef VMAL_NO_FAULT_CHECK
+ ISR isr;
if ( vpsr.cpl != 0) {
/* Inject Privileged Operation fault into guest */
set_privileged_operation_isr (vcpu, 0);
IA64FAULT ptr_fault_check(VCPU *vcpu, INST64 inst, u64 *pr2, u64 *pr3)
{
- ISR isr;
IA64FAULT ret1, ret2;
#ifdef VMAL_NO_FAULT_CHECK
+ ISR isr;
IA64_PSR vpsr;
vpsr.val=vmx_vcpu_get_psr(vcpu);
if ( vpsr.cpl != 0) {
IA64FAULT vmx_emul_thash(VCPU *vcpu, INST64 inst)
{
u64 r1,r3;
+#ifdef CHECK_FAULT
ISR visr;
IA64_PSR vpsr;
-#ifdef CHECK_FAULT
if(check_target_register(vcpu, inst.M46.r1)){
set_illegal_op_isr(vcpu);
illegal_op(vcpu);
IA64FAULT vmx_emul_ttag(VCPU *vcpu, INST64 inst)
{
u64 r1,r3;
+#ifdef CHECK_FAULT
ISR visr;
IA64_PSR vpsr;
- #ifdef CHECK_FAULT
+#endif
+#ifdef CHECK_FAULT
if(check_target_register(vcpu, inst.M46.r1)){
set_illegal_op_isr(vcpu);
illegal_op(vcpu);
IA64FAULT vmx_emul_tpa(VCPU *vcpu, INST64 inst)
{
u64 r1,r3;
- ISR visr;
#ifdef CHECK_FAULT
+ ISR visr;
if(check_target_register(vcpu, inst.M46.r1)){
set_illegal_op_isr(vcpu);
illegal_op(vcpu);
IA64FAULT vmx_emul_tak(VCPU *vcpu, INST64 inst)
{
u64 r1,r3;
+#ifdef CHECK_FAULT
ISR visr;
IA64_PSR vpsr;
int fault=IA64_NO_FAULT;
-#ifdef CHECK_FAULT
visr.val=0;
if(check_target_register(vcpu, inst.M46.r1)){
set_illegal_op_isr(vcpu);
IA64FAULT vmx_emul_itr_d(VCPU *vcpu, INST64 inst)
{
- UINT64 fault, itir, ifa, pte, slot;
- ISR isr;
+ UINT64 itir, ifa, pte, slot;
IA64_PSR vpsr;
vpsr.val=vmx_vcpu_get_psr(vcpu);
if ( vpsr.ic ) {
return IA64_FAULT;
}
#ifdef VMAL_NO_FAULT_CHECK
+ ISR isr;
if ( vpsr.cpl != 0) {
/* Inject Privileged Operation fault into guest */
set_privileged_operation_isr (vcpu, 0);
IA64FAULT vmx_emul_itr_i(VCPU *vcpu, INST64 inst)
{
- UINT64 fault, itir, ifa, pte, slot;
+ UINT64 itir, ifa, pte, slot;
+#ifdef VMAL_NO_FAULT_CHECK
ISR isr;
+#endif
IA64_PSR vpsr;
vpsr.val=vmx_vcpu_get_psr(vcpu);
if ( vpsr.ic ) {
IA64FAULT itc_fault_check(VCPU *vcpu, INST64 inst, u64 *itir, u64 *ifa,u64 *pte)
{
- UINT64 fault;
- ISR isr;
IA64_PSR vpsr;
IA64FAULT ret1;
}
#ifdef VMAL_NO_FAULT_CHECK
+ UINT64 fault;
+ ISR isr;
if ( vpsr.cpl != 0) {
/* Inject Privileged Operation fault into guest */
set_privileged_operation_isr (vcpu, 0);
IA64FAULT vmx_emul_mov_to_cr(VCPU *vcpu, INST64 inst)
{
- u64 r2,cr3;
+ u64 r2;
#ifdef CHECK_FAULT
IA64_PSR vpsr;
vpsr.val=vmx_vcpu_get_psr(vcpu);
void
vmx_emulate(VCPU *vcpu, REGS *regs)
{
- IA64_BUNDLE bundle;
- int slot;
- IA64_SLOT_TYPE slot_type;
IA64FAULT status;
INST64 inst;
UINT64 iip, cause, opcode;
iip = regs->cr_iip;
- IA64_PSR vpsr;
cause = VMX(vcpu,cause);
opcode = VMX(vcpu,opcode);
#endif
#ifdef BYPASS_VMAL_OPCODE
// make a local copy of the bundle containing the privop
+ IA64_BUNDLE bundle;
+ int slot;
+ IA64_SLOT_TYPE slot_type;
+ IA64_PSR vpsr;
bundle = __vmx_get_domain_bundle(iip);
slot = ((struct ia64_psr *)&(regs->cr_ipsr))->ri;
if (!slot) inst.inst = bundle.slot0;
status=vmx_emul_mov_from_cpuid(vcpu, inst);
break;
case EVENT_VMSW:
- printf ("Unimplemented instruction %d\n", cause);
+ printf ("Unimplemented instruction %ld\n", cause);
status=IA64_FAULT;
break;
default:
- printf("unknown cause %d, iip: %lx, ipsr: %lx\n", cause,regs->cr_iip,regs->cr_ipsr);
+ printf("unknown cause %ld, iip: %lx, ipsr: %lx\n", cause,regs->cr_iip,regs->cr_ipsr);
while(1);
/* For unknown cause, let hardware to re-execute */
status=IA64_RETRY;
static int
__is_tlb_overlap(thash_cb_t *hcb,thash_data_t *entry,int rid, char cl, u64 sva, u64 eva)
{
- uint64_t size1,size2,sa1,ea1,ea2;
+ uint64_t size1,sa1,ea1;
if ( entry->invalid || entry->rid != rid || (!entry->tc && entry->cl != cl ) ) {
return 0;
ASSERT ( hcb->ht == THASH_VHPT );
vrr = (hcb->get_rr_fn)(hcb->vcpu,va);
pages = PSIZE(vrr.ps) >> PAGE_SHIFT;
- mfn = (hcb->vs->get_mfn)(DOMID_SELF,tlb->ppn, pages);
+ mfn = (unsigned long)(hcb->vs->get_mfn)(DOMID_SELF,tlb->ppn, pages);
if ( mfn == INVALID_MFN ) return 0;
// TODO with machine discontinuous address space issue.
- vhpt->etag = (hcb->vs->tag_func)( hcb->pta, tlb->vadr);
+ vhpt->etag =(unsigned long) (hcb->vs->tag_func)( hcb->pta, tlb->vadr);
//vhpt->ti = 0;
vhpt->itir = tlb->itir & ~ITIR_RV_MASK;
vhpt->page_flags = tlb->page_flags & ~PAGE_FLAGS_RV_MASK;
u64 gppn;
u64 ppns, ppne;
- hash_table = (hcb->hash_func)(hcb->pta, va);
+ hash_table = (thash_data_t *)(hcb->hash_func)(hcb->pta, va);
if( INVALID_ENTRY(hcb, hash_table) ) {
*hash_table = *entry;
hash_table->next = 0;
static void vhpt_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va)
{
thash_data_t vhpt_entry, *hash_table, *cch;
- ia64_rr vrr;
+
if ( !__tlb_to_vhpt(hcb, entry, va, &vhpt_entry) ) {
panic("Can't convert to machine VHPT entry\n");
}
- hash_table = (hcb->hash_func)(hcb->pta, va);
+ hash_table = (thash_data_t *)(hcb->hash_func)(hcb->pta, va);
if( INVALID_ENTRY(hcb, hash_table) ) {
*hash_table = vhpt_entry;
hash_table->next = 0;
void thash_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va)
{
- thash_data_t *hash_table;
+ //thash_data_t *hash_table;
ia64_rr vrr;
vrr = (hcb->get_rr_fn)(hcb->vcpu,entry->vadr);
{
thash_data_t *hash_table, *p, *q;
thash_internal_t *priv = &hcb->priv;
- int idx;
hash_table = priv->hash_base;
if ( hash_table == entry ) {
static void rem_vtlb(thash_cb_t *hcb, thash_data_t *entry)
{
- thash_data_t *hash_table, *p, *q;
- thash_internal_t *priv = &hcb->priv;
- int idx;
if ( !entry->tc ) {
return rem_tr(hcb, entry->cl, entry->tr_idx);
}
}
-
/*
* Find an overlap entry in hash table and its collision chain.
* Refer to SDM2 4.1.1.4 for overlap definition.
{
thash_data_t *hash_table;
thash_internal_t *priv = &hcb->priv;
- u64 tag;
ia64_rr vrr;
priv->_curva = va & ~(size-1);
priv->rid = rid;
vrr = (hcb->get_rr_fn)(hcb->vcpu,va);
priv->ps = vrr.ps;
- hash_table = (hcb->hash_func)(hcb->pta, priv->_curva);
+ hash_table =(thash_data_t *)(hcb->hash_func)(hcb->pta, priv->_curva);
priv->s_sect = s_sect;
priv->cl = cl;
priv->_tr_idx = 0;
priv->rid = rid;
vrr = (hcb->get_rr_fn)(hcb->vcpu,va);
priv->ps = vrr.ps;
- hash_table = (hcb->hash_func)( hcb->pta, priv->_curva);
- tag = (hcb->vs->tag_func)( hcb->pta, priv->_curva);
+ hash_table = (thash_data_t *)(hcb->hash_func)( hcb->pta, priv->_curva);
+ tag = (unsigned long)(hcb->vs->tag_func)( hcb->pta, priv->_curva);
priv->tag = tag;
priv->hash_base = hash_table;
priv->cur_cch = hash_table;
tr = &DTR(hcb,0);
}
for (; priv->_tr_idx < num; priv->_tr_idx ++ ) {
- if ( __is_tlb_overlap(hcb, &tr[priv->_tr_idx],
+ if ( __is_tlb_overlap(hcb, &tr[(unsigned)priv->_tr_idx],
priv->rid, priv->cl,
priv->_curva, priv->_eva) ) {
- return &tr[priv->_tr_idx++];
+ return &tr[(unsigned)priv->_tr_idx++];
}
}
return NULL;
{
thash_data_t *ovl;
thash_internal_t *priv = &hcb->priv;
- u64 addr,rr_psize;
+ u64 rr_psize;
ia64_rr vrr;
if ( priv->s_sect.tr ) {
}
}
priv->_curva += rr_psize;
- priv->hash_base = (hcb->hash_func)( hcb->pta, priv->_curva);
+ priv->hash_base = (thash_data_t *)(hcb->hash_func)( hcb->pta, priv->_curva);
priv->cur_cch = priv->hash_base;
}
return NULL;
{
thash_data_t *ovl;
thash_internal_t *priv = &hcb->priv;
- u64 addr,rr_psize;
+ u64 rr_psize;
ia64_rr vrr;
vrr = (hcb->get_rr_fn)(hcb->vcpu,priv->_curva);
}
}
priv->_curva += rr_psize;
- priv->hash_base = (hcb->hash_func)( hcb->pta, priv->_curva);
- priv->tag = (hcb->vs->tag_func)( hcb->pta, priv->_curva);
+ priv->hash_base =(thash_data_t *)(hcb->hash_func)( hcb->pta, priv->_curva);
+ priv->tag = (unsigned long)(hcb->vs->tag_func)( hcb->pta, priv->_curva);
priv->cur_cch = priv->hash_base;
}
return NULL;
CACHE_LINE_TYPE cl)
{
thash_data_t *hash_table, *cch;
- u64 tag;
ia64_rr vrr;
ASSERT ( hcb->ht == THASH_VTLB );
if ( cch ) return cch;
vrr = (hcb->get_rr_fn)(hcb->vcpu,va);
- hash_table = (hcb->hash_func)( hcb->pta, va);
+ hash_table = (thash_data_t *)(hcb->hash_func)( hcb->pta, va);
if ( INVALID_ENTRY(hcb, hash_table ) )
return NULL;
*/
void tlb_remove_notifier(thash_cb_t *hcb, thash_data_t *entry)
{
- thash_cb_t *vhpt;
+// thash_cb_t *vhpt;
search_section_t s_sect;
s_sect.v = 0;
thash_purge_entries(hcb->ts->vhpt, entry, s_sect);
machine_tlb_purge(entry->vadr, entry->ps);
+ return;
}
/*
cch_mem_init (hcb);
hcb->magic = THASH_CB_MAGIC;
- hcb->pta.val = hcb->hash;
+ hcb->pta.val = (unsigned long)hcb->hash;
hcb->pta.vf = 1;
hcb->pta.ve = 1;
hcb->pta.size = sz;
// vb2 = vb1 + vtlb->hash_sz;
hash_num = vhpt->hash_sz / sizeof(thash_data_t);
// printf("vb2=%lp, size=%lx hash_num=%lx\n", vb2, vhpt->hash_sz, hash_num);
- printf("vtlb=%lp, hash=%lp size=0x%lx; vhpt=%lp, hash=%lp size=0x%lx\n",
+ printf("vtlb=%p, hash=%p size=0x%lx; vhpt=%p, hash=%p size=0x%lx\n",
vtlb, vtlb->hash,vtlb->hash_sz,
vhpt, vhpt->hash, vhpt->hash_sz);
//memcpy(vb1, vtlb->hash, vtlb->hash_sz);
}
hash ++;
}
- printf("Done vtlb entry check, hash=%lp\n", hash);
+ printf("Done vtlb entry check, hash=%p\n", hash);
printf("check_ok_num = 0x%lx check_invalid=0x%lx\n", check_ok_num,check_invalid);
invalid_ratio = 1000*check_invalid / hash_num;
printf("%02ld.%01ld%% entries are invalid\n",
if ( !INVALID_ENTRY(vhpt, hash) ) {
for ( cch= hash; cch; cch=cch->next) {
if ( !cch->checked ) {
- printf ("!!!Hash=%lp cch=%lp not within vtlb\n", hash, cch);
+ printf ("!!!Hash=%p cch=%p not within vtlb\n", hash, cch);
check_fail_num ++;
}
else {
printf("Dump vTC\n");
for ( i = 0; i < hash_num; i++ ) {
if ( !INVALID_ENTRY(vtlb, hash) ) {
- printf("VTLB at hash=%lp\n", hash);
+ printf("VTLB at hash=%p\n", hash);
for (cch=hash; cch; cch=cch->next) {
- printf("Entry %lp va=%lx ps=%lx rid=%lx\n",
+ printf("Entry %p va=%lx ps=%d rid=%d\n",
cch, cch->vadr, cch->ps, cch->rid);
}
}
printf("Dump vDTR\n");
for (i=0; i<NDTRS; i++) {
tr = &DTR(vtlb,i);
- printf("Entry %lp va=%lx ps=%lx rid=%lx\n",
+ printf("Entry %p va=%lx ps=%d rid=%d\n",
tr, tr->vadr, tr->ps, tr->rid);
}
printf("Dump vITR\n");
for (i=0; i<NITRS; i++) {
tr = &ITR(vtlb,i);
- printf("Entry %lp va=%lx ps=%lx rid=%lx\n",
+ printf("Entry %p va=%lx ps=%d rid=%d\n",
tr, tr->vadr, tr->ps, tr->rid);
}
printf("End of vTLB dump\n");